In this report, we extract information about published JOSS papers and generate graphics as well as a summary table that can be downloaded and used for further analyses.
suppressPackageStartupMessages({
library(tibble)
library(rcrossref)
library(dplyr)
library(tidyr)
library(ggplot2)
library(lubridate)
library(gh)
library(purrr)
library(jsonlite)
library(DT)
library(plotly)
library(citecorp)
library(readr)
})## Keep track of the source of each column
source_track <- c()
## Determine whether to add a caption with today's date to the (non-interactive) plots
add_date_caption <- TRUE
if (add_date_caption) {
dcap <- lubridate::today()
} else {
dcap <- ""
}## Read archived version of summary data frame, to use for filling in
## information about software repositories (due to limit on API requests)
## Sort by the date when software repo info was last obtained
papers_archive <- readRDS(gzcon(url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_analytics.rds?raw=true"))) %>%
dplyr::arrange(!is.na(repo_info_obtained), repo_info_obtained)
## Similarly for citation analysis, to avoid having to pull down the
## same information multiple times
citations_archive <- readr::read_delim(
url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_citations.tsv?raw=true"),
col_types = cols(.default = "c"), col_names = TRUE,
delim = "\t")We get the information about published JOSS papers from Crossref, using the rcrossref R package. This package is also used to extract citation counts.
## Fetch JOSS papers from Crossref
## Only 1000 papers at the time can be pulled down
lim <- 1000
papers <- rcrossref::cr_works(filter = c(issn = "2475-9066"),
limit = lim)$data
i <- 1
while (nrow(papers) == i * lim) {
papers <- dplyr::bind_rows(
papers,
rcrossref::cr_works(filter = c(issn = "2475-9066"),
limit = lim, offset = i * lim)$data)
i <- i + 1
}
papers <- papers %>%
dplyr::filter(type == "journal-article")
## A few papers don't have DOIs - generate them from the URL
noaltid <- which(is.na(papers$alternative.id))
papers$alternative.id[noaltid] <- gsub("http://dx.doi.org/", "",
papers$url[noaltid])
## Get citation info from Crossref and merge with paper details
cit <- rcrossref::cr_citation_count(doi = papers$alternative.id)
papers <- papers %>% dplyr::left_join(
cit %>% dplyr::rename(citation_count = count),
by = c("alternative.id" = "doi")
)
## Remove one duplicated paper
papers <- papers %>% dplyr::filter(alternative.id != "10.21105/joss.00688")
source_track <- c(source_track,
structure(rep("crossref", ncol(papers)),
names = colnames(papers)))For each published paper, we use the Whedon API to get information about pre-review and review issue numbers, corresponding software repository etc.
whedon <- list()
p <- 1
a <- jsonlite::fromJSON(
url(paste0("https://joss.theoj.org/papers/published.json?page=", p)),
simplifyDataFrame = FALSE
)
while (length(a) > 0) {
whedon <- c(whedon, a)
p <- p + 1
a <- jsonlite::fromJSON(
url(paste0("https://joss.theoj.org/papers/published.json?page=", p)),
simplifyDataFrame = FALSE
)
}
whedon <- do.call(dplyr::bind_rows, lapply(whedon, function(w) {
data.frame(api_title = w$title,
api_state = w$state,
editor = paste(w$metadata$paper$editor, collapse = ","),
reviewers = paste(w$reviewers, collapse = ","),
nbr_reviewers = length(w$reviewers),
repo_url = w$repository_url,
review_issue_id = w$review_issue_id,
doi = w$doi,
prereview_issue_id = ifelse(!is.null(w$meta_review_issue_id),
w$meta_review_issue_id, NA_integer_),
languages = paste(w$metadata$paper$languages, collapse = ","),
archive_doi = w$metadata$paper$archive_doi)
}))
papers <- papers %>% dplyr::left_join(whedon, by = c("alternative.id" = "doi"))
source_track <- c(source_track,
structure(rep("whedon", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))From each pre-review and review issue, we extract information about review times and assigned labels.
## Pull down info on all issues in the joss-reviews repository
issues <- gh("/repos/openjournals/joss-reviews/issues",
.limit = 5000, state = "all")## From each issue, extract required information
iss <- do.call(dplyr::bind_rows, lapply(issues, function(i) {
data.frame(title = i$title,
number = i$number,
state = i$state,
opened = i$created_at,
closed = ifelse(!is.null(i$closed_at),
i$closed_at, NA_character_),
ncomments = i$comments,
labels = paste(setdiff(
vapply(i$labels, getElement,
name = "name", character(1L)),
c("review", "pre-review", "query-scope", "paused")),
collapse = ","))
}))
## Split into REVIEW, PRE-REVIEW, and other issues (the latter category
## is discarded)
issother <- iss %>% dplyr::filter(!grepl("\\[PRE REVIEW\\]", title) &
!grepl("\\[REVIEW\\]", title))
dim(issother)## [1] 135 7
## title
## 1 Wrong repo, please ignore
## 2 Installation instructions: Is there a clearly-stated list of dependencies? Ideally these should be handled with an automated package management solution.
## 3 A statement of need: Does the paper have a section titled 'Statement of Need' that clearly states what problems the software is designed to solve and who the target audience is?
## 4 State of the field: Do the authors describe how this software compares to other commonly-used packages?
## 5 Performance: If there are any performance claims of the software, have they been confirmed? (If there are no claims, please check off this item.)
## 6 References: Is the list of references complete, and is everything cited appropriately that should be cited (e.g., papers, datasets, software)? Do references in the text use the proper citation syntax?
## number state opened closed ncomments labels
## 1 4381 closed 2022-05-06T03:39:15Z 2022-05-06T03:39:16Z 2
## 2 4349 closed 2022-04-27T06:17:12Z 2022-04-27T06:17:14Z 1
## 3 4348 closed 2022-04-27T06:14:07Z 2022-04-27T06:14:08Z 1
## 4 4347 closed 2022-04-27T06:10:55Z 2022-04-27T06:10:56Z 2
## 5 4346 closed 2022-04-27T06:03:40Z 2022-04-27T06:03:41Z 4
## 6 4345 closed 2022-04-27T05:45:21Z 2022-04-27T05:45:22Z 1
## For REVIEW issues, generate the DOI of the paper from the issue number
getnbrzeros <- function(s) {
paste(rep(0, 5 - nchar(s)), collapse = "")
}
issrev <- iss %>% dplyr::filter(grepl("\\[REVIEW\\]", title)) %>%
dplyr::mutate(nbrzeros = purrr::map_chr(number, getnbrzeros)) %>%
dplyr::mutate(alternative.id = paste0("10.21105/joss.",
nbrzeros,
number)) %>%
dplyr::select(-nbrzeros) %>%
dplyr::mutate(title = gsub("\\[REVIEW\\]: ", "", title)) %>%
dplyr::rename_at(vars(-alternative.id), ~ paste0("review_", .))## For pre-review and review issues, respectively, get the number of
## issues closed each month, and the number of those that have the
## 'rejected' label
review_rejected <- iss %>%
dplyr::filter(grepl("\\[REVIEW\\]", title)) %>%
dplyr::filter(!is.na(closed)) %>%
dplyr::mutate(closedmonth = lubridate::floor_date(as.Date(closed), "month")) %>%
dplyr::group_by(closedmonth) %>%
dplyr::summarize(nbr_issues_closed = length(labels),
nbr_rejections = sum(grepl("rejected", labels))) %>%
dplyr::mutate(itype = "review")
prereview_rejected <- iss %>%
dplyr::filter(grepl("\\[PRE REVIEW\\]", title)) %>%
dplyr::filter(!is.na(closed)) %>%
dplyr::mutate(closedmonth = lubridate::floor_date(as.Date(closed), "month")) %>%
dplyr::group_by(closedmonth) %>%
dplyr::summarize(nbr_issues_closed = length(labels),
nbr_rejections = sum(grepl("rejected", labels))) %>%
dplyr::mutate(itype = "pre-review")
all_rejected <- dplyr::bind_rows(review_rejected, prereview_rejected)## For PRE-REVIEW issues, add information about the corresponding REVIEW
## issue number
isspre <- iss %>% dplyr::filter(grepl("\\[PRE REVIEW\\]", title)) %>%
dplyr::filter(!grepl("withdrawn", labels)) %>%
dplyr::filter(!grepl("rejected", labels))
## Some titles have multiple pre-review issues. In these cases, keep the latest
isspre <- isspre %>% dplyr::arrange(desc(number)) %>%
dplyr::filter(!duplicated(title)) %>%
dplyr::mutate(title = gsub("\\[PRE REVIEW\\]: ", "", title)) %>%
dplyr::rename_all(~ paste0("prerev_", .))
papers <- papers %>% dplyr::left_join(issrev, by = "alternative.id") %>%
dplyr::left_join(isspre, by = c("prereview_issue_id" = "prerev_number")) %>%
dplyr::mutate(prerev_opened = as.Date(prerev_opened),
prerev_closed = as.Date(prerev_closed),
review_opened = as.Date(review_opened),
review_closed = as.Date(review_closed)) %>%
dplyr::mutate(days_in_pre = prerev_closed - prerev_opened,
days_in_rev = review_closed - review_opened,
to_review = !is.na(review_opened))
source_track <- c(source_track,
structure(rep("joss-github", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))## Reorder so that software repositories that were interrogated longest
## ago are checked first
tmporder <- order(match(papers$alternative.id, papers_archive$alternative.id),
na.last = FALSE)
software_urls <- papers$repo_url[tmporder]
is_github <- grepl("github", software_urls)
length(is_github)## [1] 1673
## [1] 1585
## [1] "https://gitlab.com/moerman1/fhi-cc4s"
## [2] "https://gitlab.com/thartwig/asloth"
## [3] "https://ts-gitlab.iup.uni-heidelberg.de/utopia/utopia"
## [4] "https://gitlab.com/myqueue/myqueue"
## [5] "https://gitlab.com/fduchate/predihood"
## [6] "https://bitbucket.org/orionmhdteam/orion2_release1/src/master/"
## [7] "https://ts-gitlab.iup.uni-heidelberg.de/dorie/dorie"
## [8] "https://gitlab.com/mmartin-lagarde/exonoodle-exoplanets/-/tree/master/"
## [9] "https://gitlab.inria.fr/bramas/tbfmm"
## [10] "https://gitlab.com/pyFBS/pyFBS"
## [11] "https://bitbucket.org/meg/cbcbeat"
## [12] "https://gitlab.pasteur.fr/vlegrand/ROCK"
## [13] "https://bitbucket.org/cardosan/brightway2-temporalis"
## [14] "https://gitlab.com/jason-rumengan/pyarma"
## [15] "https://gitlab.com/ffaucher/hawen"
## [16] "https://savannah.nongnu.org/projects/complot/"
## [17] "https://gitlab.com/libreumg/dataquier.git"
## [18] "https://gitlab.inria.fr/miet/miet"
## [19] "https://bitbucket.org/hammurabicode/hamx"
## [20] "https://bitbucket.org/manuela_s/hcp/"
## [21] "https://gitlab.com/manchester_qbi/manchester_qbi_public/madym_cxx/"
## [22] "http://mutabit.com/repos.fossil/grafoscopio/"
## [23] "https://gitlab.com/gdetor/genetic_alg"
## [24] "https://ts-gitlab.iup.uni-heidelberg.de/utopia/dantro"
## [25] "https://gitlab.com/emd-dev/emd"
## [26] "https://bitbucket.org/berkeleylab/hardware-control/src/main/"
## [27] "https://gitlab.com/culturalcartography/text2map"
## [28] "https://gitlab.com/cerfacs/batman"
## [29] "https://gricad-gitlab.univ-grenoble-alpes.fr/ttk/spam/"
## [30] "https://git.rwth-aachen.de/ants/sensorlab/imea"
## [31] "https://gitlab.com/picos-api/picos"
## [32] "https://bitbucket.org/rram/dvrlib/src/joss/"
## [33] "https://gitlab.ethz.ch/holukas/dyco-dynamic-lag-compensation"
## [34] "https://gitlab.com/sails-dev/sails"
## [35] "https://bitbucket.org/clhaley/Multitaper.jl"
## [36] "https://gitlab.gwdg.de/mpievolbio-it/crbhits"
## [37] "https://gitlab.com/sissopp_developers/sissopp"
## [38] "https://gitlab.com/dlr-dw/ontocode"
## [39] "https://gitlab.com/marinvaders/marinvaders"
## [40] "https://gitlab.uliege.be/smart_grids/public/gboml"
## [41] "https://gitlab.com/vibes-developers/vibes"
## [42] "https://gitlab.com/remram44/taguette"
## [43] "https://framagit.org/GustaveCoste/off-product-environmental-impact/"
## [44] "https://gitlab.com/project-dare/dare-platform"
## [45] "https://earth.bsc.es/gitlab/wuruchi/autosubmitreact"
## [46] "https://gitlab.inria.fr/azais/treex"
## [47] "https://bitbucket.org/mpi4py/mpi4py-fft"
## [48] "https://framagit.org/GustaveCoste/eldam"
## [49] "https://bitbucket.org/basicsums/basicsums"
## [50] "https://gitlab.com/eidheim/Simple-Web-Server"
## [51] "https://gitlab.com/toposens/public/ros-packages"
## [52] "https://bitbucket.org/cdegroot/wediff"
## [53] "https://gitlab.com/QComms/cqptoolkit"
## [54] "https://gitlab.com/cracklet/cracklet.git"
## [55] "https://gitlab.kitware.com/LBM/lattice-boltzmann-solver"
## [56] "https://code.usgs.gov/umesc/quant-ecology/fishstan/"
## [57] "https://www.idpoisson.fr/fullswof/"
## [58] "https://bitbucket.org/glotzer/rowan"
## [59] "https://bitbucket.org/sciencecapsule/sciencecapsule"
## [60] "https://gitlab.com/moorepants/skijumpdesign"
## [61] "https://gitlab.com/cosmograil/PyCS3"
## [62] "https://gitlab.com/davidtourigny/dynamic-fba"
## [63] "https://bitbucket.org/miketuri/perl-spice-sim-seus/"
## [64] "https://bitbucket.org/ocellarisproject/ocellaris"
## [65] "https://gitlab.inria.fr/mosaic/bvpy"
## [66] "https://bitbucket.org/berkeleylab/esdr-pygdh/"
## [67] "https://gitlab.com/dlr-ve/autumn/"
## [68] "https://git.iws.uni-stuttgart.de/tools/frackit"
## [69] "https://bitbucket.org/likask/mofem-cephas"
## [70] "https://bitbucket.org/cmutel/brightway2"
## [71] "https://gitlab.com/LMSAL_HUB/aia_hub/aiapy"
## [72] "https://gitlab.com/materials-modeling/wulffpack"
## [73] "https://sourceforge.net/p/mcapl/mcapl_code/ci/master/tree/"
## [74] "https://bitbucket.org/dolfin-adjoint/pyadjoint"
## [75] "https://gitlab.com/tesch1/cppduals"
## [76] "https://gitlab.com/geekysquirrel/bigx"
## [77] "https://bitbucket.org/cloopsy/android/"
## [78] "https://gitlab.com/energyincities/besos/"
## [79] "https://bitbucket.org/dghoshal/frieda"
## [80] "https://gitlab.com/celliern/scikit-fdiff/"
## [81] "https://gitlab.com/gims-developers/gims"
## [82] "https://bitbucket.org/mituq/muq2.git"
## [83] "https://c4science.ch/source/tamaas/"
## [84] "https://gitlab.ruhr-uni-bochum.de/reichp2y/proppy"
## [85] "https://gitlab.com/costrouc/pysrim"
## [86] "https://doi.org/10.17605/OSF.IO/3DS6A"
## [87] "https://gitlab.com/datafold-dev/datafold/"
## [88] "https://gitlab.com/ampere2/metalwalls"
df <- do.call(dplyr::bind_rows, lapply(software_urls[is_github], function(u) {
u0 <- gsub("^http://", "https://", gsub("\\.git$", "", gsub("/$", "", u)))
if (grepl("/tree/", u0)) {
u0 <- strsplit(u0, "/tree/")[[1]][1]
}
if (grepl("/blob/", u0)) {
u0 <- strsplit(u0, "/blob/")[[1]][1]
}
info <- try({
gh(gsub("(https://)?(www.)?github.com/", "/repos/", u0))
})
languages <- try({
gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/languages"),
.limit = 500)
})
topics <- try({
gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/topics"),
.accept = "application/vnd.github.mercy-preview+json", .limit = 500)
})
contribs <- try({
gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/contributors"),
.limit = 500)
})
if (!is(info, "try-error") && length(info) > 1) {
if (!is(contribs, "try-error")) {
if (length(contribs) == 0) {
repo_nbr_contribs <- repo_nbr_contribs_2ormore <- NA_integer_
} else {
repo_nbr_contribs <- length(contribs)
repo_nbr_contribs_2ormore <- sum(vapply(contribs, function(x) x$contributions >= 2, NA_integer_))
if (is.na(repo_nbr_contribs_2ormore)) {
print(contribs)
}
}
} else {
repo_nbr_contribs <- repo_nbr_contribs_2ormore <- NA_integer_
}
if (!is(languages, "try-error")) {
if (length(languages) == 0) {
repolang <- ""
} else {
repolang <- paste(paste(names(unlist(languages)),
unlist(languages), sep = ":"), collapse = ",")
}
} else {
repolang <- ""
}
if (!is(topics, "try-error")) {
if (length(topics$names) == 0) {
repotopics <- ""
} else {
repotopics <- paste(unlist(topics$names), collapse = ",")
}
} else {
repotopics <- ""
}
data.frame(repo_url = u,
repo_created = info$created_at,
repo_updated = info$updated_at,
repo_pushed = info$pushed_at,
repo_nbr_stars = info$stargazers_count,
repo_language = ifelse(!is.null(info$language),
info$language, NA_character_),
repo_languages_bytes = repolang,
repo_topics = repotopics,
repo_license = ifelse(!is.null(info$license),
info$license$key, NA_character_),
repo_nbr_contribs = repo_nbr_contribs,
repo_nbr_contribs_2ormore = repo_nbr_contribs_2ormore
)
} else {
NULL
}
})) %>%
dplyr::mutate(repo_created = as.Date(repo_created),
repo_updated = as.Date(repo_updated),
repo_pushed = as.Date(repo_pushed)) %>%
dplyr::distinct() %>%
dplyr::mutate(repo_info_obtained = lubridate::today())
stopifnot(length(unique(df$repo_url)) == length(df$repo_url))
dim(df)
## For papers not in df (i.e., for which we didn't get a valid response
## from the GitHub API query), use information from the archived data frame
dfarchive <- papers_archive %>%
dplyr::select(colnames(df)[colnames(df) %in% colnames(papers_archive)]) %>%
dplyr::filter(!(repo_url %in% df$repo_url))
df <- dplyr::bind_rows(df, dfarchive)
papers <- papers %>% dplyr::left_join(df, by = "repo_url")
source_track <- c(source_track,
structure(rep("sw-github", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))## Convert publication date to Date format
## Add information about the half year (H1, H2) of publication
## Count number of authors
papers <- papers %>% dplyr::select(-reference, -license, -link) %>%
dplyr::mutate(published.date = as.Date(published.print)) %>%
dplyr::mutate(
halfyear = paste0(year(published.date),
ifelse(month(published.date) <= 6, "H1", "H2"))
) %>% dplyr::mutate(
halfyear = factor(halfyear,
levels = paste0(rep(sort(unique(year(published.date))),
each = 2), c("H1", "H2")))
) %>% dplyr::mutate(nbr_authors = vapply(author, function(a) nrow(a), NA_integer_))
papers <- papers %>% dplyr::distinct()
source_track <- c(source_track,
structure(rep("cleanup", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))In some cases, fetching information from (e.g.) the GitHub API fails for a subset of the publications. There are also other reasons for missing values (for example, the earliest submissions do not have an associated pre-review issue). The table below lists the number of missing values for each of the variables in the data frame.
ggplot(papers %>%
dplyr::mutate(pubmonth = lubridate::floor_date(published.date, "month")) %>%
dplyr::group_by(pubmonth) %>%
dplyr::summarize(npub = n()),
aes(x = factor(pubmonth), y = npub)) +
geom_bar(stat = "identity") + theme_minimal() +
labs(x = "", y = "Number of published papers per month", caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))ggplot(papers %>%
dplyr::mutate(pubyear = lubridate::year(published.date)) %>%
dplyr::group_by(pubyear) %>%
dplyr::summarize(npub = n()),
aes(x = factor(pubyear), y = npub)) +
geom_bar(stat = "identity") + theme_minimal() +
labs(x = "", y = "Number of published papers per year", caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))The plots below illustrate the fraction of pre-review and review issues closed during each month that have the ‘rejected’ label attached.
ggplot(all_rejected,
aes(x = factor(closedmonth), y = nbr_rejections/nbr_issues_closed)) +
geom_bar(stat = "identity") +
theme_minimal() +
facet_wrap(~ itype, ncol = 1) +
labs(x = "Month of issue closing", y = "Fraction of issues rejected",
caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))Papers with 20 or more citations are grouped in the “>=20” category.
ggplot(papers %>%
dplyr::mutate(citation_count = replace(citation_count,
citation_count >= 20, ">=20")) %>%
dplyr::mutate(citation_count = factor(citation_count,
levels = c(0:20, ">=20"))) %>%
dplyr::group_by(citation_count) %>%
dplyr::tally(),
aes(x = citation_count, y = n)) +
geom_bar(stat = "identity") +
theme_minimal() +
labs(x = "Crossref citation count", y = "Number of publications", caption = dcap)The table below sorts the JOSS papers in decreasing order by the number of citations in Crossref.
DT::datatable(
papers %>%
dplyr::mutate(url = paste0("<a href='", url, "' target='_blank'>",
url,"</a>")) %>%
dplyr::arrange(desc(citation_count)) %>%
dplyr::select(title, url, published.date, citation_count),
escape = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE)
)plotly::ggplotly(
ggplot(papers, aes(x = published.date, y = citation_count, label = title)) +
geom_point(alpha = 0.5) + theme_bw() + scale_y_sqrt() +
geom_smooth() +
labs(x = "Date of publication", y = "Crossref citation count", caption = dcap) +
theme(axis.title = element_text(size = 15)),
tooltip = c("label", "x", "y")
)Here, we plot the citation count for all papers published within each half year, sorted in decreasing order.
ggplot(papers %>% dplyr::group_by(halfyear) %>%
dplyr::arrange(desc(citation_count)) %>%
dplyr::mutate(idx = seq_along(citation_count)),
aes(x = idx, y = citation_count)) +
geom_point(alpha = 0.5) +
facet_wrap(~ halfyear, scales = "free") +
theme_bw() +
labs(x = "Index", y = "Crossref citation count", caption = dcap)In these plots we investigate whether the time a submission spends in the pre-review or review stage has changed over time.
ggplot(papers, aes(x = prerev_opened, y = as.numeric(days_in_pre))) +
geom_point() + geom_smooth() + theme_bw() +
labs(x = "Date of pre-review opening", y = "Number of days in pre-review",
caption = dcap) +
theme(axis.title = element_text(size = 15))ggplot(papers, aes(x = review_opened, y = as.numeric(days_in_rev))) +
geom_point() + geom_smooth() + theme_bw() +
labs(x = "Date of review opening", y = "Number of days in review",
caption = dcap) +
theme(axis.title = element_text(size = 15))Next, we consider the languages used by the submissions, both as reported by Whedon and based on the information encoded in available GitHub repositories (for the latter, we also record the number of bytes of code written in each language). Note that a given submission can use multiple languages.
## Language information from Whedon
sspl <- strsplit(papers$languages, ",")
all_languages <- unique(unlist(sspl))
langs <- do.call(dplyr::bind_rows, lapply(all_languages, function(l) {
data.frame(language = l,
nbr_submissions_Whedon = sum(vapply(sspl, function(v) l %in% v, 0)))
}))
## Language information from GitHub software repos
a <- lapply(strsplit(papers$repo_languages_bytes, ","), function(w) strsplit(w, ":"))
a <- a[sapply(a, length) > 0]
langbytes <- as.data.frame(t(as.data.frame(a))) %>%
setNames(c("language", "bytes")) %>%
dplyr::mutate(bytes = as.numeric(bytes)) %>%
dplyr::filter(!is.na(language)) %>%
dplyr::group_by(language) %>%
dplyr::summarize(nbr_bytes_GitHub = sum(bytes),
nbr_repos_GitHub = length(bytes)) %>%
dplyr::arrange(desc(nbr_bytes_GitHub))
langs <- dplyr::full_join(langs, langbytes, by = "language")ggplot(langs %>% dplyr::arrange(desc(nbr_submissions_Whedon)) %>%
dplyr::filter(nbr_submissions_Whedon > 10) %>%
dplyr::mutate(language = factor(language, levels = language)),
aes(x = language, y = nbr_submissions_Whedon)) +
geom_bar(stat = "identity") +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
labs(x = "", y = "Number of submissions", caption = dcap) +
theme(axis.title = element_text(size = 15))DT::datatable(
langs %>% dplyr::arrange(desc(nbr_bytes_GitHub)),
escape = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE)
)ggplot(langs, aes(x = nbr_repos_GitHub, y = nbr_bytes_GitHub)) +
geom_point() + scale_x_log10() + scale_y_log10() + geom_smooth() +
theme_bw() +
labs(x = "Number of repos using the language",
y = "Total number of bytes of code\nwritten in the language",
caption = dcap) +
theme(axis.title = element_text(size = 15))ggplotly(
ggplot(papers, aes(x = citation_count, y = repo_nbr_stars,
label = title)) +
geom_point(alpha = 0.5) + scale_x_sqrt() + scale_y_sqrt() +
theme_bw() +
labs(x = "Crossref citation count", y = "Number of stars, GitHub repo",
caption = dcap) +
theme(axis.title = element_text(size = 15)),
tooltip = c("label", "x", "y")
)ggplot(papers, aes(x = as.numeric(prerev_opened - repo_created))) +
geom_histogram(bins = 50) +
theme_bw() +
labs(x = "Time (days) from repo creation to JOSS pre-review start",
caption = dcap) +
theme(axis.title = element_text(size = 15))ggplot(papers, aes(x = as.numeric(repo_pushed - review_closed))) +
geom_histogram(bins = 50) +
theme_bw() +
labs(x = "Time (days) from closure of JOSS review to most recent commit in repo",
caption = dcap) +
theme(axis.title = element_text(size = 15)) +
facet_wrap(~ year(published.date), scales = "free_y")Submissions associated with rOpenSci and pyOpenSci are not considered here, since they are not explicitly reviewed at JOSS.
ggplot(papers %>%
dplyr::filter(!grepl("rOpenSci|pyOpenSci", prerev_labels)) %>%
dplyr::mutate(year = year(published.date)),
aes(x = nbr_reviewers)) + geom_bar() +
facet_wrap(~ year) + theme_bw() +
labs(x = "Number of reviewers", y = "Number of submissions", caption = dcap)Submissions associated with rOpenSci and pyOpenSci are not considered here, since they are not explicitly reviewed at JOSS.
reviewers <- papers %>%
dplyr::filter(!grepl("rOpenSci|pyOpenSci", prerev_labels)) %>%
dplyr::mutate(year = year(published.date)) %>%
dplyr::select(reviewers, year) %>%
tidyr::separate_rows(reviewers, sep = ",")
## Most active reviewers
DT::datatable(
reviewers %>% dplyr::group_by(reviewers) %>%
dplyr::summarize(nbr_reviews = length(year),
timespan = paste(unique(c(min(year), max(year))),
collapse = " - ")) %>%
dplyr::arrange(desc(nbr_reviews)),
escape = FALSE, rownames = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE)
)ggplot(papers %>%
dplyr::mutate(year = year(published.date),
`r/pyOpenSci` = factor(
grepl("rOpenSci|pyOpenSci", prerev_labels),
levels = c("TRUE", "FALSE"))),
aes(x = editor)) + geom_bar(aes(fill = `r/pyOpenSci`)) +
theme_bw() + facet_wrap(~ year, ncol = 1) +
scale_fill_manual(values = c(`TRUE` = "grey65", `FALSE` = "grey35")) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
labs(x = "Editor", y = "Number of submissions", caption = dcap)all_licenses <- sort(unique(papers$repo_license))
license_levels = c(grep("apache", all_licenses, value = TRUE),
grep("bsd", all_licenses, value = TRUE),
grep("mit", all_licenses, value = TRUE),
grep("gpl", all_licenses, value = TRUE),
grep("mpl", all_licenses, value = TRUE))
license_levels <- c(license_levels, setdiff(all_licenses, license_levels))
ggplot(papers %>%
dplyr::mutate(repo_license = factor(repo_license,
levels = license_levels)),
aes(x = repo_license)) +
geom_bar() +
theme_bw() +
labs(x = "Software license", y = "Number of submissions", caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
facet_wrap(~ year(published.date), scales = "free_y")## For plots below, replace licenses present in less
## than 2.5% of the submissions by 'other'
tbl <- table(papers$repo_license)
to_replace <- names(tbl[tbl <= 0.025 * nrow(papers)])ggplot(papers %>%
dplyr::mutate(year = year(published.date)) %>%
dplyr::mutate(repo_license = replace(repo_license,
repo_license %in% to_replace,
"other")) %>%
dplyr::mutate(year = factor(year),
repo_license = factor(
repo_license,
levels = license_levels[license_levels %in% repo_license]
)) %>%
dplyr::group_by(year, repo_license, .drop = FALSE) %>%
dplyr::count() %>%
dplyr::mutate(year = as.integer(as.character(year))),
aes(x = year, y = n, fill = repo_license)) + geom_area() +
theme_minimal() +
scale_fill_brewer(palette = "Set1", name = "Software\nlicense",
na.value = "grey") +
theme(axis.title = element_text(size = 15)) +
labs(x = "Year", y = "Number of submissions", caption = dcap)ggplot(papers %>%
dplyr::mutate(year = year(published.date)) %>%
dplyr::mutate(repo_license = replace(repo_license,
repo_license %in% to_replace,
"other")) %>%
dplyr::mutate(year = factor(year),
repo_license = factor(
repo_license,
levels = license_levels[license_levels %in% repo_license]
)) %>%
dplyr::group_by(year, repo_license, .drop = FALSE) %>%
dplyr::summarize(n = n()) %>%
dplyr::mutate(freq = n/sum(n)) %>%
dplyr::mutate(year = as.integer(as.character(year))),
aes(x = year, y = freq, fill = repo_license)) + geom_area() +
theme_minimal() +
scale_fill_brewer(palette = "Set1", name = "Software\nlicense",
na.value = "grey") +
theme(axis.title = element_text(size = 15)) +
labs(x = "Year", y = "Fraction of submissions", caption = dcap)a <- unlist(strsplit(papers$repo_topics, ","))
a <- a[!is.na(a)]
topicfreq <- table(a)
colors <- viridis::viridis(100)
set.seed(1234)
wordcloud::wordcloud(
names(topicfreq), sqrt(topicfreq), min.freq = 1, max.words = 300,
random.order = FALSE, rot.per = 0.05, use.r.layout = FALSE,
colors = colors, scale = c(10, 0.1), random.color = TRUE,
ordered.colors = FALSE, vfont = c("serif", "plain")
)Here, we take a more detailed look at the papers that cite JOSS papers, using data from the Open Citations Corpus.
citations <- tryCatch({
citecorp::oc_coci_cites(doi = papers$alternative.id) %>%
dplyr::distinct() %>%
dplyr::mutate(citation_info_obtained = as.character(lubridate::today()))
}, error = function(e) {
NULL
})
dim(citations)## [1] 18964 8
if (!is.null(citations)) {
citations <- citations %>%
dplyr::filter(!(oci %in% citations_archive$oci))
tmpj <- rcrossref::cr_works(dois = unique(citations$citing))$data %>%
dplyr::select(contains("doi"), contains("container.title"), contains("issn"),
contains("type"), contains("publisher"), contains("prefix"))
citations <- citations %>% dplyr::left_join(tmpj, by = c("citing" = "doi"))
## bioRxiv preprints don't have a 'container.title' or 'issn', but we'll assume
## that they can be
## identified from the prefix 10.1101 - set the container.title
## for these records manually; we may or may not want to count these
## (would it count citations twice, both preprint and publication?)
citations$container.title[citations$prefix == "10.1101"] <- "bioRxiv"
## JOSS is represented by 'The Journal of Open Source Software' as well as
## 'Journal of Open Source Software'
citations$container.title[citations$container.title ==
"Journal of Open Source Software"] <-
"The Journal of Open Source Software"
## Remove real self citations (cited DOI = citing DOI)
citations <- citations %>% dplyr::filter(cited != citing)
## Merge with the archive
citations <- dplyr::bind_rows(citations, citations_archive)
} else {
citations <- citations_archive
if (is.null(citations[["citation_info_obtained"]])) {
citations$citation_info_obtained <- NA_character_
}
}
citations$citation_info_obtained[is.na(citations$citation_info_obtained)] <-
"2021-08-11"
write.table(citations, file = "joss_submission_citations.tsv",
row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)## [1] "2022-06-22"
## Number of JOSS papers with >0 citations included in this collection
length(unique(citations$cited))## [1] 1026
## Number of JOSS papers with >0 citations according to Crossref
length(which(papers$citation_count > 0))## [1] 1127
## Number of citations from Open Citations Corpus vs Crossref
df0 <- papers %>% dplyr::select(doi, citation_count) %>%
dplyr::full_join(citations %>% dplyr::group_by(cited) %>%
dplyr::tally() %>%
dplyr::mutate(n = replace(n, is.na(n), 0)),
by = c("doi" = "cited"))## [1] 22529
## [1] 18945
## Ratio of total citation count Open Citations Corpus/Crossref
sum(df0$n, na.rm = TRUE)/sum(df0$citation_count, na.rm = TRUE)## [1] 0.8409162
ggplot(df0, aes(x = citation_count, y = n)) +
geom_abline(slope = 1, intercept = 0) +
geom_point(size = 3, alpha = 0.5) +
labs(x = "Crossref citation count", y = "Open Citations Corpus citation count",
caption = dcap) +
theme_bw()## Zoom in
ggplot(df0, aes(x = citation_count, y = n)) +
geom_abline(slope = 1, intercept = 0) +
geom_point(size = 3, alpha = 0.5) +
labs(x = "Crossref citation count", y = "Open Citations Corpus citation count",
caption = dcap) +
theme_bw() +
coord_cartesian(xlim = c(0, 75), ylim = c(0, 75))## [1] 4266
## [1] 3678
topcit <- citations %>% dplyr::group_by(container.title) %>%
dplyr::summarize(nbr_citations_of_joss_papers = length(cited),
nbr_cited_joss_papers = length(unique(cited)),
nbr_citing_papers = length(unique(citing)),
nbr_selfcitations_of_joss_papers = sum(author_sc == "yes"),
fraction_selfcitations = signif(nbr_selfcitations_of_joss_papers /
nbr_citations_of_joss_papers, digits = 3)) %>%
dplyr::arrange(desc(nbr_cited_joss_papers))
DT::datatable(topcit,
escape = FALSE, rownames = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE))plotly::ggplotly(
ggplot(topcit, aes(x = nbr_citations_of_joss_papers, y = nbr_cited_joss_papers,
label = container.title)) +
geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "grey") +
geom_point(size = 3, alpha = 0.5) +
theme_bw() +
labs(caption = dcap, x = "Number of citations of JOSS papers",
y = "Number of cited JOSS papers")
)plotly::ggplotly(
ggplot(topcit, aes(x = nbr_citations_of_joss_papers, y = nbr_cited_joss_papers,
label = container.title)) +
geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "grey") +
geom_point(size = 3, alpha = 0.5) +
theme_bw() +
coord_cartesian(xlim = c(0, 100), ylim = c(0, 50)) +
labs(caption = dcap, x = "Number of citations of JOSS papers",
y = "Number of cited JOSS papers")
)The tibble object with all data collected above is serialized to a file that can be downloaded and reused.
## alternative.id container.title created deposited
## 1 10.21105/joss.00295 The Journal of Open Source Software 2017-07-26 2019-10-01
## 2 10.21105/joss.02165 Journal of Open Source Software 2020-09-02 2020-09-02
## 3 10.21105/joss.02844 Journal of Open Source Software 2021-01-17 2021-01-17
## 4 10.21105/joss.01837 Journal of Open Source Software 2019-12-03 2019-12-03
## 5 10.21105/joss.03917 Journal of Open Source Software 2021-12-02 2021-12-02
## 6 10.21105/joss.02804 Journal of Open Source Software 2020-12-14 2020-12-14
## published.print doi indexed issn issue issued
## 1 2017-07-26 10.21105/joss.00295 2022-03-30 2475-9066 15 2017-07-26
## 2 2020-09-02 10.21105/joss.02165 2022-03-30 2475-9066 53 2020-09-02
## 3 2021-01-17 10.21105/joss.02844 2022-03-30 2475-9066 57 2021-01-17
## 4 2019-12-03 10.21105/joss.01837 2022-03-30 2475-9066 44 2019-12-03
## 5 2021-12-02 10.21105/joss.03917 2022-03-30 2475-9066 68 2021-12-02
## 6 2020-12-14 10.21105/joss.02804 2022-03-30 2475-9066 56 2020-12-14
## member page prefix publisher score source reference.count
## 1 8722 295 10.21105 The Open Journal 0 Crossref 5
## 2 8722 2165 10.21105 The Open Journal 0 Crossref 21
## 3 8722 2844 10.21105 The Open Journal 0 Crossref 17
## 4 8722 1837 10.21105 The Open Journal 0 Crossref 14
## 5 8722 3917 10.21105 The Open Journal 0 Crossref 34
## 6 8722 2804 10.21105 The Open Journal 0 Crossref 28
## references.count is.referenced.by.count
## 1 5 0
## 2 21 0
## 3 17 3
## 4 14 2
## 5 34 0
## 6 28 0
## title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 Utopia: A Comprehensive and Collaborative Modeling Framework for Complex and Evolving Systems
## 3 c-lasso - a Python package for constrained sparse and robust regression and classification
## 4 CoralP: Flexible visualization of the human phosphatome
## 5 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 6 gospl: Global Scalable Paleo Landscape Evolution
## type url volume
## 1 journal-article http://dx.doi.org/10.21105/joss.00295 2
## 2 journal-article http://dx.doi.org/10.21105/joss.02165 5
## 3 journal-article http://dx.doi.org/10.21105/joss.02844 6
## 4 journal-article http://dx.doi.org/10.21105/joss.01837 4
## 5 journal-article http://dx.doi.org/10.21105/joss.03917 6
## 6 journal-article http://dx.doi.org/10.21105/joss.02804 5
## short.container.title
## 1 JOSS
## 2 JOSS
## 3 JOSS
## 4 JOSS
## 5 JOSS
## 6 JOSS
## author
## 1 http://orcid.org/0000-0002-7127-2789, http://orcid.org/0000-0003-2680-3066, http://orcid.org/0000-0002-3769-0127, FALSE, FALSE, FALSE, Nima, Weixin, Alan, S. Hejazi, Cai, E. Hubbard, first, additional, additional
## 2 http://orcid.org/0000-0002-4667-3652, http://orcid.org/0000-0001-6343-3004, http://orcid.org/0000-0001-7787-9496, http://orcid.org/0000-0003-3858-0904, NA, FALSE, FALSE, FALSE, FALSE, NA, Lukas, Benjamin, Harald, Yunus, Julian, Riedel, Herdeanu, Mack, Sevinchan, Weninger, first, additional, additional, additional, additional
## 3 Léo, Patrick, Christian, Simpson, Combettes, Müller, first, additional, additional, NA, NA, http://orcid.org/0000-0002-3821-7083, NA, NA, FALSE
## 4 Amit, Erika, Marielle, Eric, Douglas, Min, Deoudes, Bond, Davis, Phanstiel, first, additional, additional, additional, additional, NA, NA, NA, http://orcid.org/0000-0003-4051-3217, http://orcid.org/0000-0003-2123-0051, NA, NA, NA, FALSE, FALSE
## 5 http://orcid.org/0000-0003-2217-4768, FALSE, Shailesh, Kumar, first
## 6 http://orcid.org/0000-0001-6095-7689, http://orcid.org/0000-0003-2595-2414, http://orcid.org/0000-0002-6751-4976, FALSE, FALSE, FALSE, Tristan, Claire, Sabin, Salles, Mallard, Zahirovic, first, additional, additional
## citation_count
## 1 0
## 2 0
## 3 3
## 4 3
## 5 0
## 6 0
## api_title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 Utopia: A Comprehensive and Collaborative Modeling Framework for Complex and Evolving Systems
## 3 c-lasso - a Python package for constrained sparse and robust regression and classification
## 4 CoralP: Flexible visualization of the human phosphatome
## 5 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 6 gospl: Global Scalable Paleo Landscape Evolution
## api_state editor reviewers nbr_reviewers
## 1 accepted @karthik @NelleV 1
## 2 accepted @arfon @platipodium 1
## 3 accepted @mjsottile @jbytecode,@glemaitre 2
## 4 accepted @majensen @daissi,@stulacy 2
## 5 accepted @pdebuyl @Saran-nns,@mirca 2
## 6 accepted @kbarnhart @johnjarmitage,@cmshobe 2
## repo_url review_issue_id
## 1 https://github.com/nhejazi/biotmle 295
## 2 https://ts-gitlab.iup.uni-heidelberg.de/utopia/utopia 2165
## 3 https://github.com/Leo-Simpson/c-lasso 2844
## 4 https://github.com/PhanstielLab/coralp 1837
## 5 https://github.com/carnotresearch/cr-sparse 3917
## 6 https://github.com/Geodels/gospl/ 2804
## prereview_issue_id languages
## 1 156 Makefile,R,TeX
## 2 2149 CMake,C++,TeX,Python
## 3 2811 Python,Makefile,HTML,JavaScript,CSS
## 4 1803 R,TeX,CSS,JavaScript
## 5 3913 Python,Shell,TeX
## 6 2771 Shell,Fortran,Python,Jupyter Notebook,TeX
## archive_doi
## 1 http://dx.doi.org/10.5281/zenodo.834849
## 2 https://doi.org/10.5281/zenodo.4011979
## 3 https://doi.org/10.6084/m9.figshare.13589585.v1
## 4 https://doi.org/10.5281/zenodo.3560845
## 5 https://doi.org/10.5281/zenodo.5749792
## 6 https://doi.org/10.5281/zenodo.4319332
## review_title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 Utopia: A Comprehensive and Collaborative Modeling Framework for Complex and Evolving Systems
## 3 c-lasso: a Python package for constrained sparse regression and classification
## 4 CoralP: Flexible visualization of the human phosphatome
## 5 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 6 gospl: Global Scalable Paleo Landscape Evolution
## review_number review_state review_opened review_closed review_ncomments
## 1 295 closed 2017-06-14 2017-07-26 22
## 2 2165 closed 2020-03-17 2020-09-02 96
## 3 2844 closed 2020-11-18 2021-01-17 57
## 4 1837 closed 2019-10-26 2019-12-03 39
## 5 3917 closed 2021-11-16 2021-12-02 59
## 6 2804 closed 2020-10-30 2020-12-14 69
## review_labels
## 1 accepted,recommend-accept,published
## 2 accepted,recommend-accept,published
## 3 accepted,TeX,Python,recommend-accept,published
## 4 accepted,recommend-accept,published
## 5 accepted,TeX,Shell,Python,recommend-accept,published
## 6 accepted,Shell,Python,Fortran,recommend-accept,published
## prerev_title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 Utopia: A Comprehensive and Collaborative Modeling Framework for Complex and Evolving Systems
## 3 c-lasso: a Python package for constrained sparse regression and classification
## 4 CoralP: Flexible visualization of the human phosphatome
## 5 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 6 gospl: Global Scalable Paleo Landscape Evolution
## prerev_state prerev_opened prerev_closed prerev_ncomments
## 1 closed 2017-01-09 2017-06-14 27
## 2 closed 2020-03-08 2020-03-17 39
## 3 closed 2020-11-02 2020-11-18 37
## 4 closed 2019-10-12 2019-10-26 20
## 5 closed 2021-11-12 2021-11-16 29
## 6 closed 2020-10-23 2020-10-30 25
## prerev_labels days_in_pre days_in_rev to_review repo_created
## 1 rOpenSci 156 days 42 days TRUE 2016-08-16
## 2 Python,C++,CMake 9 days 169 days TRUE <NA>
## 3 TeX,Python 16 days 60 days TRUE 2019-11-19
## 4 TeX,R,CSS 14 days 38 days TRUE 2019-04-02
## 5 TeX,Shell,Python 4 days 16 days TRUE 2020-12-22
## 6 Shell,Python,Fortran 7 days 45 days TRUE 2019-09-07
## repo_updated repo_pushed repo_nbr_stars repo_language
## 1 2021-10-14 2021-10-14 4 R
## 2 <NA> <NA> NA <NA>
## 3 2021-10-28 2021-05-05 20 HTML
## 4 2021-08-09 2021-08-09 0 R
## 5 2022-05-25 2021-12-09 39 Jupyter Notebook
## 6 2022-05-13 2022-03-14 25 Python
## repo_languages_bytes
## 1 R:33321,TeX:5137,Makefile:763
## 2 <NA>
## 3 HTML:1467883,Python:320162,Jupyter Notebook:64156,JavaScript:23193,CSS:11680,TeX:10492,Makefile:201
## 4 R:232199,TeX:35959,CSS:10562,JavaScript:10399
## 5 Jupyter Notebook:1232323,Python:888520,TeX:18209,Shell:187
## 6 Python:375345,Jupyter Notebook:60269,Fortran:54463,TeX:9682,Dockerfile:2867,Shell:2265
## repo_topics
## 1 bioinformatics,biostatistics,bioconductor,statistics,machine-learning,causal-inference,r,bioconductor-packages,bioconductor-package,targeted-learning,biomarker-discovery,computational-biology
## 2 <NA>
## 3
## 4
## 5 sparse-representations,jax,wavelets,convex-optimization,linear-operators,compressive-sensing,functional-programming,l1-regularization,sparse-linear-systems,first-order-conic-solvers,lasso
## 6 paleogeography,sediment-transport,paleoclimate,landscape,landscape-evolution-model,basin-modeling,sedimentation,erosion-process,lithology,compaction
## repo_license repo_nbr_contribs repo_nbr_contribs_2ormore repo_info_obtained
## 1 other 5 5 2022-06-01
## 2 <NA> NA NA <NA>
## 3 mit 2 2 2022-05-25
## 4 mit 6 5 2022-06-15
## 5 apache-2.0 2 1 2022-06-08
## 6 gpl-3.0 3 3 2022-06-01
## published.date halfyear nbr_authors
## 1 2017-07-26 2017H2 3
## 2 2020-09-02 2020H2 5
## 3 2021-01-17 2021H1 3
## 4 2019-12-03 2019H2 5
## 5 2021-12-02 2021H2 1
## 6 2020-12-14 2020H2 3
To read the current version of this file directly from GitHub, use the following code:
## R version 4.2.1 (2022-06-23)
## Platform: x86_64-apple-darwin17.0 (64-bit)
## Running under: macOS Big Sur ... 10.16
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] readr_2.1.2 citecorp_0.3.0 plotly_4.10.0 DT_0.23
## [5] jsonlite_1.8.0 purrr_0.3.4 gh_1.3.0 lubridate_1.8.0
## [9] ggplot2_3.3.6 tidyr_1.2.0 dplyr_1.0.9 rcrossref_1.1.0.99
## [13] tibble_3.1.7
##
## loaded via a namespace (and not attached):
## [1] viridis_0.6.2 httr_1.4.3 sass_0.4.1 splines_4.2.1
## [5] bit64_4.0.5 vroom_1.5.7 viridisLite_0.4.0 bslib_0.3.1
## [9] shiny_1.7.1 highr_0.9 triebeard_0.3.0 urltools_1.7.3
## [13] yaml_2.3.5 lattice_0.20-45 pillar_1.7.0 glue_1.6.2
## [17] digest_0.6.29 RColorBrewer_1.1-3 promises_1.2.0.1 colorspace_2.0-3
## [21] Matrix_1.4-1 htmltools_0.5.2 httpuv_1.6.5 plyr_1.8.7
## [25] pkgconfig_2.0.3 httpcode_0.3.0 xtable_1.8-4 gitcreds_0.1.1
## [29] scales_1.2.0 whisker_0.4 later_1.3.0 tzdb_0.3.0
## [33] mgcv_1.8-40 generics_0.1.2 farver_2.1.0 ellipsis_0.3.2
## [37] withr_2.5.0 lazyeval_0.2.2 cli_3.3.0 magrittr_2.0.3
## [41] crayon_1.5.1 mime_0.12 evaluate_0.15 fansi_1.0.3
## [45] nlme_3.1-157 xml2_1.3.3 tools_4.2.1 data.table_1.14.2
## [49] hms_1.1.1 lifecycle_1.0.1 stringr_1.4.0 munsell_0.5.0
## [53] compiler_4.2.1 jquerylib_0.1.4 rlang_1.0.3 grid_4.2.1
## [57] htmlwidgets_1.5.4 crosstalk_1.2.0 miniUI_0.1.1.1 labeling_0.4.2
## [61] rmarkdown_2.14 gtable_0.3.0 curl_4.3.2 fauxpas_0.5.0
## [65] R6_2.5.1 gridExtra_2.3 knitr_1.39 fastmap_1.1.0
## [69] bit_4.0.4 utf8_1.2.2 stringi_1.7.6 parallel_4.2.1
## [73] crul_1.2.0 Rcpp_1.0.8.3 vctrs_0.4.1 wordcloud_2.6
## [77] tidyselect_1.1.2 xfun_0.31